In [2]:
# Requirements
##Installs
def check_dependencies_already_installed() -> bool:
    try:
        import gdown
        return True
    except ImportError:
        return False


def install_dependencies():
    !pip install 'tensorflow_addons' 'tensorflow-determinism' 'gdown'


dependencies_already_installed = check_dependencies_already_installed()
print(f'dependencies_already_installed: {dependencies_already_installed}')
if not dependencies_already_installed:
    install_dependencies()

##Imports
import os
import random
import json
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.distribute.tpu_strategy import TPUStrategy
import tensorflow_addons as tfa
from skimage.metrics import structural_similarity
from scipy.stats import wasserstein_distance
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from PIL import Image
from pathlib import Path
from datetime import datetime
from zipfile import ZipFile


# Environment setup
##Set tensorflow deterministic mode
def set_tf_deterministic_mode():
    os.environ['TF_DETERMINISTIC_OPS'] = '1'
    os.environ['TF_CUDNN_DETERMINISTIC'] = '1'


##Set random seed
def set_training_random_seed(seed: int):
    random.seed(seed)
    tf.random.set_seed(seed)
    np.random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    print(f'set_training_random_seed() - seed value: {seed}')


##Connect to strongest available device
set_tf_deterministic_mode()


def choose_strongest_available_device_strategy():
    try:
        tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
        tf.config.experimental_connect_to_cluster(tpu)
        tf.tpu.experimental.initialize_tpu_system(tpu)
        selected_strategy = TPUStrategy(tpu)
    except:
        selected_strategy = tf.distribute.get_strategy()

    print(f"choose_strongest_available_device_strategy() - selected strategy type: {type(selected_strategy).__name__}")

    # todo itay - delete this section so it won't mess up in google colab
    gpu_is_available = any(tf.config.list_physical_devices('GPU'))
    if gpu_is_available:
        !nvidia-smi

    return selected_strategy


DEVICE_STRATEGY = choose_strongest_available_device_strategy()

# todo itay - delete
assert tf.__version__ == '2.6.4'

##Download competition dataset
LOCAL_DATASET_FOLDER_PATH = Path('./train_data/gan-getting-started/')


def download_competition_dataset_if_not_present():
    dataset_already_downloaded = LOCAL_DATASET_FOLDER_PATH.exists()
    print(f"dataset_already_downloaded: {dataset_already_downloaded}")
    if not dataset_already_downloaded:
        # note - this is the untouched competition dataset. just uploaded it to the drive so it'll be
        # available via colab as well.
        !gdown '1ZwcoO11NKhYsbuM7hzdSzKjGOnOx6X94'
        !mkdir -p {LOCAL_DATASET_FOLDER_PATH}
        !unzip -o -q ./gan-getting-started.zip -d {LOCAL_DATASET_FOLDER_PATH}


download_competition_dataset_if_not_present()


def _choose_30_images(
        original_ordered_monet_dataset: Dataset, method: str,
        experiment_random_seed: int, use_preprocessed_cache: bool
) -> Dataset:
    set_training_random_seed(seed=42)

    try:
        original_ordered_monet_images = list(original_ordered_monet_dataset)
        if use_preprocessed_cache:
            preprocessed_indices_cache = {
                'random_selection': [203, 266, 152, 9, 233, 226, 196, 109, 5, 175, 237, 57, 218, 45, 182, 221, 289, 211, 148, 165, 78, 113, 249, 250, 104, 42, 281, 295, 157, 238],
                'farthest_images_by_pixel_distance': [57, 299, 113, 74, 160, 193, 139, 283, 93, 56, 90, 196, 108, 34, 45, 40, 249, 87, 240, 106, 218, 208, 2, 289, 16, 11, 155, 52, 292, 272],
                'closest_images_by_pixel_distance': [57, 119, 185, 203, 70, 216, 238, 61, 273, 169, 81, 222, 68, 137, 58, 67, 295, 164, 50, 217, 179, 190, 125, 290, 44, 251, 274, 195, 7, 165],
                'farthest_images_by_structural_distance': [57, 134, 16, 152, 133, 116, 94, 147, 177, 161, 40, 9, 218, 110, 70, 101, 75, 54, 243, 100, 98, 237, 77, 115, 106, 119, 45, 261, 241, 90],
                'closest_images_by_structural_distance': [57, 254, 238, 160, 86, 295, 187, 51, 41, 81, 273, 120, 230, 148, 25, 13, 264, 114, 236, 155, 58, 59, 151, 91, 107, 207, 289, 203, 176, 125],
                'farthest_images_by_earth_movers_distance': [57, 168, 144, 249, 145, 117, 288, 109, 48, 207, 252, 251, 186, 278, 296, 39, 263, 234, 167, 165, 128, 41, 290, 176, 33, 107, 202, 201, 282, 127],
                'closest_images_by_earth_movers_distance': [57, 196, 13, 131, 148, 97, 187, 74, 11, 162, 147, 240, 55, 70, 102, 76, 213, 139, 256, 253, 247, 17, 227, 108, 86, 133, 19, 77, 42, 1],
            }
            assert method in preprocessed_indices_cache, f"unknown method - '{method}'"
            chosen_30_images_indices = preprocessed_indices_cache[method]
        elif method == 'random_selection':
            chosen_30_images_indices = _pick_random_images(
                original_ordered_monet_images, images_count=30
            )
        elif method == 'farthest_images_by_pixel_distance':
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_images_pixel_distance,
                images_count=30
            )
        elif method == 'closest_images_by_pixel_distance':
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_images_pixel_distance,
                images_count=30,
                reverse_distance=True
            )
        elif method == 'farthest_images_by_structural_distance':
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_images_structural_distance,
                images_count=30
            )
        elif method == 'closest_images_by_structural_distance':
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_images_structural_distance,
                images_count=30,
                reverse_distance=True
            )
        elif method == 'farthest_images_by_earth_movers_distance':
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_earth_movers_distance,
                images_count=30
            )
        elif method == 'closest_images_by_earth_movers_distance':
            chosen_30_images_indices = _pick_images_farthest_from_each_other(
                original_ordered_monet_images,
                distance_func=_earth_movers_distance,
                images_count=30,
                reverse_distance=True
            )
        else:
            raise NotImplementedError(f"unknown method - '{method}'")

        chosen_30_images_dataset = Dataset.from_tensor_slices([
            original_ordered_monet_images[image_idx]
            for image_idx in chosen_30_images_indices
        ])
        _plot_chosen_30_images(chosen_30_images_dataset)
    finally:
        set_training_random_seed(experiment_random_seed)
    return chosen_30_images_dataset


def _plot_chosen_30_images(chosen_30_images_dataset):
    try:
        images_shape = list(chosen_30_images_dataset)[0].shape
        print(f'*** Selected 30 train monet photos (shape: {images_shape}) ***')
        _, ax = plt.subplots(30, 1, figsize=(50, 50))
        for i, img in enumerate(chosen_30_images_dataset):
            img = (img * 127.5 + 127.5).numpy()[0].astype(np.uint8)

            ax[i].imshow(img)
        plt.show()
    finally:
        plt.close()


# Load competition dataset
##Load full dataset
def find_competition_dataset_files(local_dataset_folder_path: Path):
    if isinstance(DEVICE_STRATEGY, TPUStrategy):
        from kaggle_datasets import KaggleDatasets
        dataset_folder_path = Path(KaggleDatasets().get_gcs_path())
    else:
        dataset_folder_path = local_dataset_folder_path

    monet_dataset_files = tf.io.gfile.glob(str(dataset_folder_path / 'monet_tfrec/*.tfrec'))
    photo_dataset_files = tf.io.gfile.glob(str(dataset_folder_path / 'photo_tfrec/*.tfrec'))
    assert any(monet_dataset_files)
    assert any(photo_dataset_files)
    print(f"found {len(monet_dataset_files)} monet and {len(photo_dataset_files)} photo tfrec files.")

    return monet_dataset_files, photo_dataset_files


def _prepare_image_tensor_for_training(image):
    image = (tf.cast(image, tf.float32) / 127.5) - 1
    image = tf.reshape(image, [256, 256, 3])
    image = tf.image.resize(image, (320, 320), method='bilinear')
    return image


def load_tf_records_dataset(tf_record_files) -> Dataset:
    def _read_and_normalize_tfrecord(record):
        tfrecord_format = {
            "image_name": tf.io.FixedLenFeature([], tf.string),
            "image": tf.io.FixedLenFeature([], tf.string),
            "target": tf.io.FixedLenFeature([], tf.string)
        }
        record = tf.io.parse_single_example(record, tfrecord_format)
        image = record['image']
        image = tf.image.decode_jpeg(image, channels=3)
        image = _prepare_image_tensor_for_training(image)
        return image

    sorted_tf_record_files = sorted(tf_record_files)
    dataset = tf.data.TFRecordDataset(sorted_tf_record_files)
    dataset = dataset.map(_read_and_normalize_tfrecord, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    return dataset


##Pick 30 train monet images strategies
def _pick_images_farthest_from_each_other(
        original_ordered_monet_images: list, distance_func, images_count: int, reverse_distance: bool = False
) -> list:
    comparison_images_resize_shape = (100, 100)

    def _pre_comparison_transformation_func(image_tensor):
        image_array = image_tensor.numpy()
        denormalized_image = (image_array * 127.5 + 127.5)
        resized_image = tf.image.resize(denormalized_image, comparison_images_resize_shape).numpy()
        resized_image = resized_image.astype(np.uint8)
        resized_image = resized_image[0]
        return resized_image

    final_distance_func = distance_func
    if reverse_distance:
        final_distance_func = lambda image1, image2: -distance_func(image1, image2)

    chosen_30_images_indices = _incremental_farthest_search(
        original_ordered_monet_images,
        k=images_count,
        distance_func=final_distance_func,
        pre_comparison_transformation_func=_pre_comparison_transformation_func
    )
    return chosen_30_images_indices


def _pick_random_images(original_ordered_monet_images: list, images_count: int) -> list:
    chosen_30_images_indices = list(np.random.choice(
        list(range(len(original_ordered_monet_images))), size=images_count, replace=False
    ))
    return chosen_30_images_indices


def _incremental_farthest_search(
        ordered_image_tensors_list, k: int, distance_func, pre_comparison_transformation_func
):
    remaining_images = [
        dict(
            orig_image_index=orig_image_index,
            img_tensor=img_tensor,
            img_comparison_array=pre_comparison_transformation_func(img_tensor)
        )
        for orig_image_index, img_tensor in enumerate(ordered_image_tensors_list)
    ]

    chosen_30_images_indices = [remaining_images.pop(random.randint(0, len(remaining_images) - 1))]
    for _ in tqdm(list(range(k - 1)), desc='incremental_farthest_search() main loop'):
        distances = [
            distance_func(
                i['img_comparison_array'],
                chosen_30_images_indices[0]['img_comparison_array']
            )
            for i in remaining_images
        ]
        for i, p in enumerate(remaining_images):
            for j, s in enumerate(chosen_30_images_indices):
                distances[i] = min(distances[i], distance_func(
                    p['img_comparison_array'], s['img_comparison_array']
                ))
        chosen_30_images_indices.append(remaining_images.pop(distances.index(max(distances))))
    chosen_30_images_indices = [i['orig_image_index'] for i in chosen_30_images_indices]
    return chosen_30_images_indices


def _images_pixel_distance(image1: np.array, image2: np.array) -> float:
    distance = np.sum((image1.flatten() - image2.flatten()) ** 2)
    return distance


def _images_structural_distance(image1: np.array, image2: np.array) -> float:
    similarity_index, *_ = structural_similarity(image1.flatten(), image2.flatten(), full=True)
    return -similarity_index


def _earth_movers_distance(image1: np.array, image2: np.array) -> float:
    image1_hist = _calc_image_greyscale_histogram(image1)
    image2_hist = _calc_image_greyscale_histogram(image2)
    distance = wasserstein_distance(image1_hist, image2_hist)
    return distance


def _calc_image_greyscale_histogram(image: np.array) -> np.array:
    greyscale_image = np.array(Image.fromarray(image).convert('L'))
    h, w = greyscale_image.shape
    hist = [0.0] * 256
    for i in range(h):
        for j in range(w):
            hist[greyscale_image[i, j]] += 1
    return np.array(hist) / (h * w)


def down_sample(filters, size, strides=2, padding='same'):
    initializer = tf.random_normal_initializer(0., 0.02)

    network = keras.Sequential()
    network.add(layers.Conv2D(filters, size, strides=strides, padding=padding,
                              kernel_initializer=initializer, use_bias=False))

    network.add(layers.LeakyReLU())

    return network


def up_sample(filters, size, strides=2, padding='same', apply_dropout=False):
    network = keras.Sequential()
    network.add(layers.Conv2DTranspose(
        filters, size, strides=strides, padding=padding, use_bias=False,
        kernel_initializer=tf.random_normal_initializer(0., 0.02)
    ))
    if apply_dropout:
        network.add(layers.Dropout(0.5))
    network.add(layers.ReLU())
    return network


def build_generator_model():
    inputs = layers.Input(shape=[320, 320, 3])

    # bs = batch size
    down_stack = [
        down_sample(64, 4),  # (bs, 160, 160, 64)
        down_sample(128, 4),  # (bs, 80, 80, 128)
        down_sample(256, 4),  # (bs, 40, 40, 256)
        down_sample(512, 4),  # (bs, 20, 20, 512)
        down_sample(512, 4),  # (bs, 10, 10, 512)
        down_sample(512, 4),  # (bs, 5, 5, 512)
        down_sample(512, 4, strides=1, padding='valid'),  # (bs, 2, 2, 512)
        down_sample(512, 4),  # (bs, 1, 1, 512)
    ]

    up_stack = [
        up_sample(512, 4, apply_dropout=True),  # (bs, 2, 2, 1024)
        up_sample(512, 4, strides=1, padding='valid', apply_dropout=True),  # (bs, 5, 5, 1024)
        up_sample(512, 4, apply_dropout=True),  # (bs, 10, 10, 1024)
        up_sample(512, 4),  # (bs, 20, 20, 1024)
        up_sample(256, 4),  # (bs, 40, 40, 512)
        up_sample(128, 4),  # (bs, 80, 80, 256)
        up_sample(64, 4),  # (bs, 160, 160, 128)
    ]

    initializer = tf.random_normal_initializer(0., 0.02)
    last = layers.Conv2DTranspose(3, 4,
                                  strides=2,
                                  padding='same',
                                  kernel_initializer=initializer,
                                  activation='tanh')  # (bs, 320, 320, 3)

    x = inputs

    # Downsampling through the model
    skips = []
    for down in down_stack:
        x = down(x)
        skips.append(x)

    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        x = layers.Concatenate()([x, skip])

    x = last(x)

    generator_model = keras.Model(inputs=inputs, outputs=x)
    return generator_model


"""# Build the discriminator

The discriminator takes in the input image and classifies it as real or fake (generated). Instead of outputing a single node, the discriminator outputs a smaller 2D image with higher pixel values indicating a real classification and lower values indicating a fake classification.
"""


def build_discriminator_model():
    initializer = tf.random_normal_initializer(0., 0.02)
    inp = layers.Input(shape=[320, 320, 3], name='input_image')

    x = inp
    down1 = down_sample(64, 5)(x)  # (bs, 160, 160, 64)
    down2 = down_sample(128, 4)(down1)  # (bs, 80, 80, 128)
    down3 = down_sample(256, 3)(down2)  # (bs, 40, 40, 256)
    down4 = down_sample(256, 2)(down3)  # (bs, 20, 20, 256)

    zero_pad1 = layers.ZeroPadding2D()(down4)  # (bs, 34, 34, 256)
    conv = layers.Conv2D(512, 4, strides=1,
                         kernel_initializer=initializer,
                         use_bias=False)(zero_pad1)  # (bs, 31, 31, 512)

    leaky_relu = layers.LeakyReLU()(conv)
    zero_pad2 = layers.ZeroPadding2D()(leaky_relu)  # (bs, 33, 33, 512)
    last = layers.Conv2D(1, 4, strides=1,
                         kernel_initializer=initializer)(zero_pad2)  # (bs, 30, 30, 1)

    discriminator_model = tf.keras.Model(inputs=inp, outputs=last)
    return discriminator_model


class CycleGan(keras.Model):
    def __init__(
            self,
            monet_generator,
            photo_generator,
            monet_discriminator,
            photo_discriminator,
            lambda_cycle=10,
    ):
        super(CycleGan, self).__init__()
        self.m_gen = monet_generator
        self.p_gen = photo_generator
        self.m_disc = monet_discriminator
        self.p_disc = photo_discriminator
        self.lambda_cycle = lambda_cycle

    def compile(
            self,
            m_gen_optimizer,
            p_gen_optimizer,
            m_disc_optimizer,
            p_disc_optimizer,
            gen_loss_fn,
            disc_loss_fn,
            cycle_loss_fn,
            identity_loss_fn
    ):
        super(CycleGan, self).compile()
        self.m_gen_optimizer = m_gen_optimizer
        self.p_gen_optimizer = p_gen_optimizer
        self.m_disc_optimizer = m_disc_optimizer
        self.p_disc_optimizer = p_disc_optimizer
        self.gen_loss_fn = gen_loss_fn
        self.disc_loss_fn = disc_loss_fn
        self.cycle_loss_fn = cycle_loss_fn
        self.identity_loss_fn = identity_loss_fn
        self._my_hack_history = []

    def train_step(self, batch_data):
        real_monet, real_photo = batch_data

        with tf.GradientTape(persistent=True) as tape:
            # photo to monet back to photo
            fake_monet = self.m_gen(real_photo, training=True)
            cycled_photo = self.p_gen(fake_monet, training=True)

            # monet to photo back to monet
            fake_photo = self.p_gen(real_monet, training=True)
            cycled_monet = self.m_gen(fake_photo, training=True)

            # generating itself
            same_monet = self.m_gen(real_monet, training=True)
            same_photo = self.p_gen(real_photo, training=True)

            # discriminator used to check, inputing real images
            disc_real_monet = self.m_disc(real_monet, training=True)
            disc_real_photo = self.p_disc(real_photo, training=True)

            # discriminator used to check, inputing fake images
            disc_fake_monet = self.m_disc(fake_monet, training=True)
            disc_fake_photo = self.p_disc(fake_photo, training=True)

            # evaluates generator loss
            monet_gen_loss = self.gen_loss_fn(disc_fake_monet)
            photo_gen_loss = self.gen_loss_fn(disc_fake_photo)

            # evaluates total cycle consistency loss
            total_cycle_loss = self.cycle_loss_fn(real_monet, cycled_monet, self.lambda_cycle) + self.cycle_loss_fn(
                real_photo, cycled_photo, self.lambda_cycle)

            # evaluates total generator loss
            total_monet_gen_loss = monet_gen_loss + total_cycle_loss + self.identity_loss_fn(real_monet, same_monet,
                                                                                             self.lambda_cycle)
            total_photo_gen_loss = photo_gen_loss + total_cycle_loss + self.identity_loss_fn(real_photo, same_photo,
                                                                                             self.lambda_cycle)

            # evaluates discriminator loss
            monet_disc_loss = self.disc_loss_fn(disc_real_monet, disc_fake_monet)
            photo_disc_loss = self.disc_loss_fn(disc_real_photo, disc_fake_photo)

        # Calculate the gradients for generator and discriminator
        monet_generator_gradients = tape.gradient(total_monet_gen_loss,
                                                  self.m_gen.trainable_variables)
        photo_generator_gradients = tape.gradient(total_photo_gen_loss,
                                                  self.p_gen.trainable_variables)

        monet_discriminator_gradients = tape.gradient(monet_disc_loss,
                                                      self.m_disc.trainable_variables)
        photo_discriminator_gradients = tape.gradient(photo_disc_loss,
                                                      self.p_disc.trainable_variables)

        # Apply the gradients to the optimizer
        self.m_gen_optimizer.apply_gradients(zip(monet_generator_gradients,
                                                 self.m_gen.trainable_variables))

        self.p_gen_optimizer.apply_gradients(zip(photo_generator_gradients,
                                                 self.p_gen.trainable_variables))

        self.m_disc_optimizer.apply_gradients(zip(monet_discriminator_gradients,
                                                  self.m_disc.trainable_variables))

        self.p_disc_optimizer.apply_gradients(zip(photo_discriminator_gradients,
                                                  self.p_disc.trainable_variables))

        rett = {
            "monet_gen_loss": total_monet_gen_loss,
            "photo_gen_loss": total_photo_gen_loss,
            "monet_disc_loss": monet_disc_loss,
            "photo_disc_loss": photo_disc_loss
        }
        self._my_hack_history.append(rett)
        return rett


#Graph plotting utils
def plot_cycle_gan_train_losses(train_history):
    epoch_level_train_history = {
        loss_name: {
            epoch: epoch_losses.flatten().mean()
            for epoch, epoch_losses in enumerate(epochs_losses)
        }
        for loss_name, epochs_losses in train_history.history.items()
    }

    try:
        fig, (top_ax, bottom_ax) = plt.subplots(2, figsize=(12, 12))
        fig.suptitle('Loss vs Epoch')

        top_ax.set_title('Generators')
        plt_lines = []
        for loss_name in ('monet_gen_loss', 'photo_gen_loss'):
            loss_epoch_values = epoch_level_train_history[loss_name]
            line, = top_ax.plot(loss_epoch_values.keys(), loss_epoch_values.values(), label=loss_name)
            plt_lines.append(line)
        top_ax.legend(handles=plt_lines)

        bottom_ax.set_title('Discriminators')
        plt_lines = []
        for loss_name in ('monet_disc_loss', 'photo_disc_loss'):
            loss_epoch_values = epoch_level_train_history[loss_name]
            line, = bottom_ax.plot(loss_epoch_values.keys(), loss_epoch_values.values(), label=loss_name)
            plt_lines.append(line)
        bottom_ax.legend(handles=plt_lines)

        plt.tight_layout()
        plt.show()
    finally:
        plt.close()

    final_losses = {
        loss_name: str(loss_epoch_values[max(loss_epoch_values.keys())])
        for loss_name, loss_epoch_values in epoch_level_train_history.items()
    }
    print(f"*** trained cycle gan final losses ***\n{json.dumps(final_losses, indent=4)}")


"""# Define loss functions

The discriminator loss function below compares real images to a matrix of 1s and fake images to a matrix of 0s. The perfect discriminator will output all 1s for real images and all 0s for fake images. The discriminator loss outputs the average of the real and generated loss.
"""

"""# Create submission file"""
def create_predictions_for_kaggle_submission(monet_generator: tf.keras.Model, photo_dataset: Dataset):
    wip_images_folder = Path(f"/tmp/wip_{datetime.now().strftime('%y_%m_%d__%H_%M_%S')}/")
    wip_images_folder.mkdir(parents=True, exist_ok=True)
    output_zip_path = Path('/kaggle/working/images.zip')
    if output_zip_path.exists():
        output_zip_path.unlink()

    with ZipFile(output_zip_path, 'w') as output_images_zip, \
            tqdm(total=7_038, desc='generating prediction images for kaggle submission') as pbar:
        for i, img in enumerate(photo_dataset):
            prediction = monet_generator(img, training=False)[0]
            prediction = tf.image.resize(prediction, (256, 256), method='bilinear')
            prediction = prediction.numpy()
            prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
            im = Image.fromarray(prediction)
            wip_image_file_path = wip_images_folder / f'{i + 1}.jpg'
            im.save(wip_image_file_path)
            output_images_zip.write(filename=wip_image_file_path, arcname=wip_image_file_path.name)
            wip_image_file_path.unlink()
            pbar.update()


def experiment_flow(
        choose_30_images_method: str,
        train_settings: dict,
        experiment_random_seed: int,
        create_kaggle_predictions_for_submission: bool = False,
):
    set_training_random_seed(experiment_random_seed)

    monet_dataset_files, photo_dataset_files = find_competition_dataset_files(LOCAL_DATASET_FOLDER_PATH)
    original_ordered_monet_dataset = load_tf_records_dataset(monet_dataset_files).batch(1)
    photo_dataset = load_tf_records_dataset(photo_dataset_files).batch(1)

    chosen_30_monet_dataset = _choose_30_images(
        original_ordered_monet_dataset, choose_30_images_method,
        experiment_random_seed, use_preprocessed_cache=True
    )

    with DEVICE_STRATEGY.scope():
        monet_generator = build_generator_model()  # transforms photos to Monet-esque paintings
        photo_generator = build_generator_model()  # transforms Monet paintings to be more like photos

        monet_discriminator = build_discriminator_model()  # differentiates real Monet paintings and generated Monet paintings
        photo_discriminator = build_discriminator_model()  # differentiates real photos and generated photos

    with DEVICE_STRATEGY.scope():
        def discriminator_loss(real, generated):
            real_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(
                tf.ones_like(real), real)

            generated_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True,
                                                                reduction=tf.keras.losses.Reduction.NONE)(
                tf.zeros_like(generated), generated)

            total_disc_loss = real_loss + generated_loss

            return total_disc_loss * 0.5

    """The generator wants to fool the discriminator into thinking the generated image is real. The perfect generator will have the discriminator output only 1s. Thus, it compares the generated image to a matrix of 1s to find the loss."""

    with DEVICE_STRATEGY.scope():
        def generator_loss(generated):
            return tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(
                tf.ones_like(generated), generated)

    """We want our original photo and the twice transformed photo to be similar to one another. Thus, we can calculate the cycle consistency loss be finding the average of their difference."""

    with DEVICE_STRATEGY.scope():
        def calc_cycle_loss(real_image, cycled_image, lambda_):
            loss1 = tf.reduce_mean(tf.abs(real_image - cycled_image))

            return lambda_ * loss1

    """The identity loss compares the image with its generator (i.e. photo with photo generator). If given a photo as input, we want it to generate the same image as the image was originally a photo. The identity loss compares the input with the output of the generator."""

    with DEVICE_STRATEGY.scope():
        def identity_loss(real_image, same_image, lambda_):
            loss = tf.reduce_mean(tf.abs(real_image - same_image))
            return lambda_ * 0.5 * loss

    """# Train the CycleGAN

    Let's compile our model. Since we used `tf.keras.Model` to build our CycleGAN, we can just ude the `fit` function to train our model.
    """

    with DEVICE_STRATEGY.scope():
        optimizer_builder = train_settings['optimizer_builder']
        monet_generator_optimizer = optimizer_builder()
        photo_generator_optimizer = optimizer_builder()

        monet_discriminator_optimizer = optimizer_builder()
        photo_discriminator_optimizer = optimizer_builder()

    with DEVICE_STRATEGY.scope():
        cycle_gan_model = CycleGan(
            monet_generator, photo_generator, monet_discriminator, photo_discriminator
        )

        cycle_gan_model.compile(
            m_gen_optimizer=monet_generator_optimizer,
            p_gen_optimizer=photo_generator_optimizer,
            m_disc_optimizer=monet_discriminator_optimizer,
            p_disc_optimizer=photo_discriminator_optimizer,
            gen_loss_fn=generator_loss,
            disc_loss_fn=discriminator_loss,
            cycle_loss_fn=calc_cycle_loss,
            identity_loss_fn=identity_loss
        )

    print(f'\n\n*** running_on_tpu - {isinstance(DEVICE_STRATEGY, TPUStrategy)} ***\n\n')

    train_history = cycle_gan_model.fit(
        tf.data.Dataset.zip((chosen_30_monet_dataset, photo_dataset)),
        epochs=train_settings['train_epochs'],
        verbose=2
    )
    plot_cycle_gan_train_losses(train_history)

    print('*** Show trained model predictions sample ***')
    _, ax = plt.subplots(5, 2, figsize=(12, 12))
    for i, img in enumerate(photo_dataset.take(5)):
        prediction = monet_generator(img, training=False)[0].numpy()
        prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
        img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)

        ax[i, 0].imshow(img)
        ax[i, 1].imshow(prediction)
        ax[i, 0].set_title("Input Photo")
        ax[i, 1].set_title("Monet-esque")
        ax[i, 0].axis("off")
        ax[i, 1].axis("off")
    plt.show()

    if create_kaggle_predictions_for_submission:
        create_predictions_for_kaggle_submission(monet_generator, photo_dataset)


choose_30_images_methods = (
    'random_selection',
    'farthest_images_by_pixel_distance',
    'closest_images_by_pixel_distance',
    'farthest_images_by_structural_distance',
    'closest_images_by_structural_distance',
    'farthest_images_by_earth_movers_distance',
    'closest_images_by_earth_movers_distance',
)

base_desc = 'choose_30_images_methods loop'
with tqdm(total=len(choose_30_images_methods), desc=base_desc) as pbar:
    for method in choose_30_images_methods:
        pbar.set_description(f"{base_desc} (curr_method = '{method}')")
        experiment_flow(
            choose_30_images_method=method,
            train_settings=dict(
                train_epochs=40,
                optimizer_builder=lambda: tf.keras.optimizers.Adam(learning_rate=0.001, decay=0.001)
            ),
            experiment_random_seed=1,
            create_kaggle_predictions_for_submission=False
        )
        pbar.update()
dependencies_already_installed: True
choose_strongest_available_device_strategy() - selected strategy type: _DefaultDistributionStrategy
Wed Feb 15 10:08:05 2023       
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 470.82.01    Driver Version: 470.82.01    CUDA Version: 11.4     |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|                               |                      |               MIG M. |
|===============================+======================+======================|
|   0  Tesla P100-PCIE...  Off  | 00000000:00:04.0 Off |                    0 |
| N/A   42C    P0    36W / 250W |  15977MiB / 16280MiB |      0%      Default |
|                               |                      |                  N/A |
+-------------------------------+----------------------+----------------------+
                                                                               
+-----------------------------------------------------------------------------+
| Processes:                                                                  |
|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
|        ID   ID                                                   Usage      |
|=============================================================================|
+-----------------------------------------------------------------------------+
dataset_already_downloaded: True
choose_30_images_methods loop (curr_method = 'random_selection'):   0%|          | 0/7 [00:00<?, ?it/s]
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 10:08:06.353221: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1


*** running_on_tpu - False ***


Epoch 1/40
30/30 - 19s - monet_gen_loss: 6.9626 - photo_gen_loss: 5.9112 - monet_disc_loss: 0.2345 - photo_disc_loss: 0.6435
Epoch 2/40
30/30 - 12s - monet_gen_loss: 6.7633 - photo_gen_loss: 4.6662 - monet_disc_loss: 0.0989 - photo_disc_loss: 0.6746
Epoch 3/40
30/30 - 12s - monet_gen_loss: 4.6350 - photo_gen_loss: 4.8057 - monet_disc_loss: 0.7258 - photo_disc_loss: 0.6197
Epoch 4/40
30/30 - 12s - monet_gen_loss: 3.5104 - photo_gen_loss: 3.1220 - monet_disc_loss: 0.7640 - photo_disc_loss: 0.7535
Epoch 5/40
30/30 - 12s - monet_gen_loss: 3.4218 - photo_gen_loss: 2.8866 - monet_disc_loss: 0.8662 - photo_disc_loss: 0.7112
Epoch 6/40
30/30 - 12s - monet_gen_loss: 3.2593 - photo_gen_loss: 2.6979 - monet_disc_loss: 0.7104 - photo_disc_loss: 0.7792
Epoch 7/40
30/30 - 12s - monet_gen_loss: 3.2395 - photo_gen_loss: 2.6063 - monet_disc_loss: 0.7459 - photo_disc_loss: 0.7625
Epoch 8/40
30/30 - 12s - monet_gen_loss: 3.1849 - photo_gen_loss: 2.5222 - monet_disc_loss: 0.8755 - photo_disc_loss: 0.8341
Epoch 9/40
30/30 - 12s - monet_gen_loss: 3.3647 - photo_gen_loss: 2.4910 - monet_disc_loss: 0.8527 - photo_disc_loss: 0.8187
Epoch 10/40
30/30 - 12s - monet_gen_loss: 3.3144 - photo_gen_loss: 2.7887 - monet_disc_loss: 0.8126 - photo_disc_loss: 0.6342
Epoch 11/40
30/30 - 12s - monet_gen_loss: 3.0709 - photo_gen_loss: 2.5117 - monet_disc_loss: 0.8032 - photo_disc_loss: 0.7544
Epoch 12/40
30/30 - 12s - monet_gen_loss: 3.3065 - photo_gen_loss: 2.7014 - monet_disc_loss: 0.9114 - photo_disc_loss: 0.7117
Epoch 13/40
30/30 - 12s - monet_gen_loss: 3.2505 - photo_gen_loss: 2.4335 - monet_disc_loss: 0.8669 - photo_disc_loss: 0.8110
Epoch 14/40
30/30 - 12s - monet_gen_loss: 3.0201 - photo_gen_loss: 2.3520 - monet_disc_loss: 0.8344 - photo_disc_loss: 0.8064
Epoch 15/40
30/30 - 12s - monet_gen_loss: 3.1055 - photo_gen_loss: 2.3546 - monet_disc_loss: 0.9768 - photo_disc_loss: 0.8247
Epoch 16/40
30/30 - 12s - monet_gen_loss: 2.8504 - photo_gen_loss: 2.3155 - monet_disc_loss: 0.8964 - photo_disc_loss: 0.7060
Epoch 17/40
30/30 - 12s - monet_gen_loss: 3.0314 - photo_gen_loss: 2.3105 - monet_disc_loss: 0.9045 - photo_disc_loss: 0.8191
Epoch 18/40
30/30 - 12s - monet_gen_loss: 3.1208 - photo_gen_loss: 2.4715 - monet_disc_loss: 0.9147 - photo_disc_loss: 0.7057
Epoch 19/40
30/30 - 12s - monet_gen_loss: 2.8780 - photo_gen_loss: 2.1129 - monet_disc_loss: 0.9312 - photo_disc_loss: 0.9064
Epoch 20/40
30/30 - 12s - monet_gen_loss: 2.9279 - photo_gen_loss: 2.4213 - monet_disc_loss: 0.9045 - photo_disc_loss: 0.6794
Epoch 21/40
30/30 - 12s - monet_gen_loss: 2.9812 - photo_gen_loss: 2.2528 - monet_disc_loss: 0.9079 - photo_disc_loss: 0.8212
Epoch 22/40
30/30 - 12s - monet_gen_loss: 2.8172 - photo_gen_loss: 2.1002 - monet_disc_loss: 0.8932 - photo_disc_loss: 0.8550
Epoch 23/40
30/30 - 12s - monet_gen_loss: 2.7932 - photo_gen_loss: 2.2246 - monet_disc_loss: 0.9597 - photo_disc_loss: 0.7914
Epoch 24/40
30/30 - 12s - monet_gen_loss: 2.8371 - photo_gen_loss: 2.2055 - monet_disc_loss: 0.7947 - photo_disc_loss: 0.8277
Epoch 25/40
30/30 - 12s - monet_gen_loss: 3.0438 - photo_gen_loss: 2.4541 - monet_disc_loss: 0.9631 - photo_disc_loss: 0.7323
Epoch 26/40
30/30 - 12s - monet_gen_loss: 2.8756 - photo_gen_loss: 2.1502 - monet_disc_loss: 0.8776 - photo_disc_loss: 0.8439
Epoch 27/40
30/30 - 12s - monet_gen_loss: 2.9542 - photo_gen_loss: 2.4061 - monet_disc_loss: 0.9210 - photo_disc_loss: 0.6947
Epoch 28/40
30/30 - 12s - monet_gen_loss: 2.8047 - photo_gen_loss: 2.0609 - monet_disc_loss: 0.8612 - photo_disc_loss: 0.8932
Epoch 29/40
30/30 - 12s - monet_gen_loss: 2.8905 - photo_gen_loss: 2.3097 - monet_disc_loss: 0.9428 - photo_disc_loss: 0.7569
Epoch 30/40
30/30 - 12s - monet_gen_loss: 2.7459 - photo_gen_loss: 2.1800 - monet_disc_loss: 0.8869 - photo_disc_loss: 0.8299
Epoch 31/40
30/30 - 12s - monet_gen_loss: 2.8500 - photo_gen_loss: 2.3103 - monet_disc_loss: 0.8642 - photo_disc_loss: 0.7043
Epoch 32/40
30/30 - 12s - monet_gen_loss: 2.8857 - photo_gen_loss: 2.1102 - monet_disc_loss: 0.9202 - photo_disc_loss: 0.8771
Epoch 33/40
30/30 - 12s - monet_gen_loss: 2.7054 - photo_gen_loss: 2.2203 - monet_disc_loss: 0.8380 - photo_disc_loss: 0.7105
Epoch 34/40
30/30 - 12s - monet_gen_loss: 2.7378 - photo_gen_loss: 2.1267 - monet_disc_loss: 0.8733 - photo_disc_loss: 0.8300
Epoch 35/40
30/30 - 12s - monet_gen_loss: 2.6889 - photo_gen_loss: 2.1963 - monet_disc_loss: 0.8667 - photo_disc_loss: 0.7326
Epoch 36/40
30/30 - 12s - monet_gen_loss: 2.7621 - photo_gen_loss: 2.1806 - monet_disc_loss: 0.8704 - photo_disc_loss: 0.7980
Epoch 37/40
30/30 - 12s - monet_gen_loss: 2.7949 - photo_gen_loss: 2.1478 - monet_disc_loss: 0.9164 - photo_disc_loss: 0.7865
Epoch 38/40
30/30 - 12s - monet_gen_loss: 2.6348 - photo_gen_loss: 2.1189 - monet_disc_loss: 0.8554 - photo_disc_loss: 0.7779
Epoch 39/40
30/30 - 12s - monet_gen_loss: 2.9706 - photo_gen_loss: 2.2352 - monet_disc_loss: 1.0323 - photo_disc_loss: 0.7970
Epoch 40/40
30/30 - 12s - monet_gen_loss: 2.5987 - photo_gen_loss: 2.1137 - monet_disc_loss: 0.9298 - photo_disc_loss: 0.8303
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "2.598734",
    "photo_gen_loss": "2.1136844",
    "monet_disc_loss": "0.929799",
    "photo_disc_loss": "0.83030796"
}
*** Show trained model predictions sample ***
choose_30_images_methods loop (curr_method = 'farthest_images_by_pixel_distance'):  14%|█▍        | 1/7 [13:34<1:21:29, 814.87s/it]
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 10:21:41.146216: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1


*** running_on_tpu - False ***


Epoch 1/40
30/30 - 19s - monet_gen_loss: 4.4588 - photo_gen_loss: 4.3893 - monet_disc_loss: 0.9679 - photo_disc_loss: 0.6927
Epoch 2/40
30/30 - 12s - monet_gen_loss: 4.7791 - photo_gen_loss: 4.1088 - monet_disc_loss: 0.6077 - photo_disc_loss: 0.7131
Epoch 3/40
30/30 - 12s - monet_gen_loss: 3.7342 - photo_gen_loss: 3.2213 - monet_disc_loss: 0.6502 - photo_disc_loss: 0.7103
Epoch 4/40
30/30 - 12s - monet_gen_loss: 3.3687 - photo_gen_loss: 3.0517 - monet_disc_loss: 0.6132 - photo_disc_loss: 0.6684
Epoch 5/40
30/30 - 12s - monet_gen_loss: 3.5114 - photo_gen_loss: 2.9045 - monet_disc_loss: 0.6288 - photo_disc_loss: 0.6473
Epoch 6/40
30/30 - 12s - monet_gen_loss: 3.0068 - photo_gen_loss: 2.7803 - monet_disc_loss: 0.6712 - photo_disc_loss: 0.6469
Epoch 7/40
30/30 - 12s - monet_gen_loss: 3.5435 - photo_gen_loss: 3.0683 - monet_disc_loss: 0.5127 - photo_disc_loss: 0.6262
Epoch 8/40
30/30 - 12s - monet_gen_loss: 2.7614 - photo_gen_loss: 2.7735 - monet_disc_loss: 0.6545 - photo_disc_loss: 0.6372
Epoch 9/40
30/30 - 12s - monet_gen_loss: 3.0355 - photo_gen_loss: 2.6660 - monet_disc_loss: 0.4845 - photo_disc_loss: 0.6585
Epoch 10/40
30/30 - 12s - monet_gen_loss: 3.6220 - photo_gen_loss: 3.1978 - monet_disc_loss: 0.4996 - photo_disc_loss: 0.6169
Epoch 11/40
30/30 - 12s - monet_gen_loss: 2.8367 - photo_gen_loss: 2.6351 - monet_disc_loss: 0.7118 - photo_disc_loss: 0.6369
Epoch 12/40
30/30 - 12s - monet_gen_loss: 2.9212 - photo_gen_loss: 2.4975 - monet_disc_loss: 0.6706 - photo_disc_loss: 0.6133
Epoch 13/40
30/30 - 12s - monet_gen_loss: 2.7333 - photo_gen_loss: 2.4768 - monet_disc_loss: 0.6357 - photo_disc_loss: 0.5805
Epoch 14/40
30/30 - 12s - monet_gen_loss: 2.6923 - photo_gen_loss: 2.2797 - monet_disc_loss: 0.5942 - photo_disc_loss: 0.6433
Epoch 15/40
30/30 - 12s - monet_gen_loss: 2.6800 - photo_gen_loss: 2.3027 - monet_disc_loss: 0.5680 - photo_disc_loss: 0.5492
Epoch 16/40
30/30 - 12s - monet_gen_loss: 2.4687 - photo_gen_loss: 2.2622 - monet_disc_loss: 0.4972 - photo_disc_loss: 0.5643
Epoch 17/40
30/30 - 12s - monet_gen_loss: 2.6511 - photo_gen_loss: 2.4242 - monet_disc_loss: 0.6424 - photo_disc_loss: 0.5433
Epoch 18/40
30/30 - 12s - monet_gen_loss: 2.6250 - photo_gen_loss: 2.4111 - monet_disc_loss: 0.5430 - photo_disc_loss: 0.5057
Epoch 19/40
30/30 - 12s - monet_gen_loss: 2.6670 - photo_gen_loss: 2.3912 - monet_disc_loss: 0.5175 - photo_disc_loss: 0.5799
Epoch 20/40
30/30 - 12s - monet_gen_loss: 2.4601 - photo_gen_loss: 2.2331 - monet_disc_loss: 0.6611 - photo_disc_loss: 0.5898
Epoch 21/40
30/30 - 12s - monet_gen_loss: 2.6985 - photo_gen_loss: 2.2634 - monet_disc_loss: 0.4954 - photo_disc_loss: 0.5056
Epoch 22/40
30/30 - 12s - monet_gen_loss: 2.6048 - photo_gen_loss: 2.2794 - monet_disc_loss: 0.5960 - photo_disc_loss: 0.4929
Epoch 23/40
30/30 - 12s - monet_gen_loss: 2.5756 - photo_gen_loss: 2.2293 - monet_disc_loss: 0.4458 - photo_disc_loss: 0.5316
Epoch 24/40
30/30 - 12s - monet_gen_loss: 2.4966 - photo_gen_loss: 2.1695 - monet_disc_loss: 0.7215 - photo_disc_loss: 0.6127
Epoch 25/40
30/30 - 12s - monet_gen_loss: 2.4521 - photo_gen_loss: 2.2148 - monet_disc_loss: 0.6475 - photo_disc_loss: 0.5186
Epoch 26/40
30/30 - 12s - monet_gen_loss: 2.5898 - photo_gen_loss: 2.5094 - monet_disc_loss: 0.6118 - photo_disc_loss: 0.4357
Epoch 27/40
30/30 - 12s - monet_gen_loss: 2.4629 - photo_gen_loss: 2.3138 - monet_disc_loss: 0.5766 - photo_disc_loss: 0.4973
Epoch 28/40
30/30 - 12s - monet_gen_loss: 2.5601 - photo_gen_loss: 2.3552 - monet_disc_loss: 0.5605 - photo_disc_loss: 0.4953
Epoch 29/40
30/30 - 12s - monet_gen_loss: 2.5462 - photo_gen_loss: 2.3141 - monet_disc_loss: 0.5594 - photo_disc_loss: 0.5284
Epoch 30/40
30/30 - 12s - monet_gen_loss: 2.6758 - photo_gen_loss: 2.3794 - monet_disc_loss: 0.5579 - photo_disc_loss: 0.5441
Epoch 31/40
30/30 - 12s - monet_gen_loss: 2.5304 - photo_gen_loss: 2.3340 - monet_disc_loss: 0.5466 - photo_disc_loss: 0.5333
Epoch 32/40
30/30 - 12s - monet_gen_loss: 2.6963 - photo_gen_loss: 2.3732 - monet_disc_loss: 0.5290 - photo_disc_loss: 0.5095
Epoch 33/40
30/30 - 12s - monet_gen_loss: 2.4645 - photo_gen_loss: 2.5061 - monet_disc_loss: 0.5262 - photo_disc_loss: 0.3853
Epoch 34/40
30/30 - 12s - monet_gen_loss: 2.7450 - photo_gen_loss: 2.2151 - monet_disc_loss: 0.5070 - photo_disc_loss: 0.5724
Epoch 35/40
30/30 - 12s - monet_gen_loss: 2.4855 - photo_gen_loss: 2.3618 - monet_disc_loss: 0.5901 - photo_disc_loss: 0.5203
Epoch 36/40
30/30 - 12s - monet_gen_loss: 2.6043 - photo_gen_loss: 2.3857 - monet_disc_loss: 0.5189 - photo_disc_loss: 0.5244
Epoch 37/40
30/30 - 12s - monet_gen_loss: 2.7309 - photo_gen_loss: 2.1973 - monet_disc_loss: 0.5159 - photo_disc_loss: 0.6037
Epoch 38/40
30/30 - 12s - monet_gen_loss: 2.4817 - photo_gen_loss: 2.3855 - monet_disc_loss: 0.6943 - photo_disc_loss: 0.5185
Epoch 39/40
30/30 - 12s - monet_gen_loss: 2.8610 - photo_gen_loss: 2.4673 - monet_disc_loss: 0.5180 - photo_disc_loss: 0.4894
Epoch 40/40
30/30 - 12s - monet_gen_loss: 2.5357 - photo_gen_loss: 2.4546 - monet_disc_loss: 0.5345 - photo_disc_loss: 0.4957
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "2.5356746",
    "photo_gen_loss": "2.4545534",
    "monet_disc_loss": "0.53454167",
    "photo_disc_loss": "0.49571583"
}
*** Show trained model predictions sample ***
choose_30_images_methods loop (curr_method = 'closest_images_by_pixel_distance'):  29%|██▊       | 2/7 [27:01<1:07:28, 809.79s/it] 
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 10:35:07.646219: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1


*** running_on_tpu - False ***


Epoch 1/40
30/30 - 19s - monet_gen_loss: 11.7324 - photo_gen_loss: 16.0208 - monet_disc_loss: 0.6124 - photo_disc_loss: 0.1802
Epoch 2/40
30/30 - 12s - monet_gen_loss: 11.6406 - photo_gen_loss: 19.3454 - monet_disc_loss: 0.5037 - photo_disc_loss: 0.0233
Epoch 3/40
30/30 - 12s - monet_gen_loss: 11.8868 - photo_gen_loss: 19.8025 - monet_disc_loss: 0.6504 - photo_disc_loss: 0.0961
Epoch 4/40
30/30 - 12s - monet_gen_loss: 12.3364 - photo_gen_loss: 21.6842 - monet_disc_loss: 0.6132 - photo_disc_loss: 0.0110
Epoch 5/40
30/30 - 12s - monet_gen_loss: 12.0152 - photo_gen_loss: 21.1887 - monet_disc_loss: 0.6451 - photo_disc_loss: 0.1739
Epoch 6/40
30/30 - 12s - monet_gen_loss: 11.9488 - photo_gen_loss: 18.5762 - monet_disc_loss: 0.6598 - photo_disc_loss: 0.1041
Epoch 7/40
30/30 - 12s - monet_gen_loss: 12.1539 - photo_gen_loss: 17.8359 - monet_disc_loss: 0.6808 - photo_disc_loss: 0.0910
Epoch 8/40
30/30 - 12s - monet_gen_loss: 11.9292 - photo_gen_loss: 16.4150 - monet_disc_loss: 0.6627 - photo_disc_loss: 0.1154
Epoch 9/40
30/30 - 12s - monet_gen_loss: 11.7463 - photo_gen_loss: 16.1379 - monet_disc_loss: 0.6670 - photo_disc_loss: 0.1395
Epoch 10/40
30/30 - 12s - monet_gen_loss: 12.1239 - photo_gen_loss: 47.5131 - monet_disc_loss: 0.6334 - photo_disc_loss: 0.0456
Epoch 11/40
30/30 - 12s - monet_gen_loss: 11.8386 - photo_gen_loss: 678.6506 - monet_disc_loss: 0.7525 - photo_disc_loss: 1.1373e-04
Epoch 12/40
30/30 - 12s - monet_gen_loss: 11.8327 - photo_gen_loss: 482.8421 - monet_disc_loss: 0.6266 - photo_disc_loss: 0.0000e+00
Epoch 13/40
30/30 - 12s - monet_gen_loss: 12.0350 - photo_gen_loss: 269.9286 - monet_disc_loss: 0.5228 - photo_disc_loss: 2.8972e-18
Epoch 14/40
30/30 - 12s - monet_gen_loss: 11.9781 - photo_gen_loss: 263.7853 - monet_disc_loss: 0.6659 - photo_disc_loss: 1.0845e-17
Epoch 15/40
30/30 - 12s - monet_gen_loss: 11.5539 - photo_gen_loss: 262.5569 - monet_disc_loss: 0.6964 - photo_disc_loss: 1.2079e-17
Epoch 16/40
30/30 - 12s - monet_gen_loss: 11.7371 - photo_gen_loss: 262.8848 - monet_disc_loss: 0.6553 - photo_disc_loss: 9.9435e-18
Epoch 17/40
30/30 - 12s - monet_gen_loss: 11.7902 - photo_gen_loss: 262.8795 - monet_disc_loss: 0.6572 - photo_disc_loss: 1.1141e-17
Epoch 18/40
30/30 - 12s - monet_gen_loss: 11.8024 - photo_gen_loss: 239.5985 - monet_disc_loss: 0.6437 - photo_disc_loss: 1.6445e-14
Epoch 19/40
30/30 - 12s - monet_gen_loss: 11.9889 - photo_gen_loss: 239.7152 - monet_disc_loss: 0.5396 - photo_disc_loss: 1.9118e-14
Epoch 20/40
30/30 - 12s - monet_gen_loss: 12.1505 - photo_gen_loss: 239.4394 - monet_disc_loss: 0.6268 - photo_disc_loss: 2.2768e-14
Epoch 21/40
30/30 - 12s - monet_gen_loss: 11.7608 - photo_gen_loss: 239.5597 - monet_disc_loss: 0.6948 - photo_disc_loss: 2.2523e-14
Epoch 22/40
30/30 - 12s - monet_gen_loss: 11.7539 - photo_gen_loss: 239.3948 - monet_disc_loss: 0.6914 - photo_disc_loss: 2.2728e-14
Epoch 23/40
30/30 - 12s - monet_gen_loss: 11.7995 - photo_gen_loss: 239.4379 - monet_disc_loss: 0.6991 - photo_disc_loss: 1.7749e-14
Epoch 24/40
30/30 - 12s - monet_gen_loss: 11.7242 - photo_gen_loss: 239.2478 - monet_disc_loss: 0.5811 - photo_disc_loss: 2.0051e-14
Epoch 25/40
30/30 - 12s - monet_gen_loss: 11.7571 - photo_gen_loss: 239.3789 - monet_disc_loss: 0.6949 - photo_disc_loss: 2.1500e-14
Epoch 26/40
30/30 - 12s - monet_gen_loss: 11.8410 - photo_gen_loss: 239.4239 - monet_disc_loss: 0.6247 - photo_disc_loss: 1.9544e-14
Epoch 27/40
30/30 - 12s - monet_gen_loss: 11.7944 - photo_gen_loss: 239.4363 - monet_disc_loss: 0.7205 - photo_disc_loss: 2.1901e-14
Epoch 28/40
30/30 - 12s - monet_gen_loss: 11.7350 - photo_gen_loss: 239.3528 - monet_disc_loss: 0.6691 - photo_disc_loss: 1.8415e-14
Epoch 29/40
30/30 - 12s - monet_gen_loss: 12.4086 - photo_gen_loss: 239.9542 - monet_disc_loss: 0.6248 - photo_disc_loss: 1.9307e-14
Epoch 30/40
30/30 - 12s - monet_gen_loss: 11.9885 - photo_gen_loss: 239.5469 - monet_disc_loss: 0.5741 - photo_disc_loss: 2.2954e-14
Epoch 31/40
30/30 - 12s - monet_gen_loss: 11.6274 - photo_gen_loss: 239.4102 - monet_disc_loss: 0.6921 - photo_disc_loss: 2.0094e-14
Epoch 32/40
30/30 - 12s - monet_gen_loss: 11.8509 - photo_gen_loss: 239.5338 - monet_disc_loss: 0.6215 - photo_disc_loss: 2.1074e-14
Epoch 33/40
30/30 - 12s - monet_gen_loss: 11.5850 - photo_gen_loss: 239.3910 - monet_disc_loss: 0.6789 - photo_disc_loss: 1.9613e-14
Epoch 34/40
30/30 - 12s - monet_gen_loss: 11.8489 - photo_gen_loss: 239.4683 - monet_disc_loss: 0.6747 - photo_disc_loss: 1.8879e-14
Epoch 35/40
30/30 - 12s - monet_gen_loss: 11.7444 - photo_gen_loss: 239.3445 - monet_disc_loss: 0.7020 - photo_disc_loss: 2.4536e-14
Epoch 36/40
30/30 - 12s - monet_gen_loss: 11.7916 - photo_gen_loss: 239.4478 - monet_disc_loss: 0.6995 - photo_disc_loss: 2.1945e-14
Epoch 37/40
30/30 - 12s - monet_gen_loss: 12.4418 - photo_gen_loss: 240.0525 - monet_disc_loss: 0.6552 - photo_disc_loss: 2.1482e-14
Epoch 38/40
30/30 - 12s - monet_gen_loss: 12.3874 - photo_gen_loss: 239.9925 - monet_disc_loss: 0.6209 - photo_disc_loss: 1.9753e-14
Epoch 39/40
30/30 - 12s - monet_gen_loss: 12.1583 - photo_gen_loss: 239.7286 - monet_disc_loss: 0.6849 - photo_disc_loss: 1.9469e-14
Epoch 40/40
30/30 - 12s - monet_gen_loss: 12.2301 - photo_gen_loss: 239.7898 - monet_disc_loss: 0.6661 - photo_disc_loss: 1.7409e-14
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "12.230062",
    "photo_gen_loss": "239.78984",
    "monet_disc_loss": "0.6660673",
    "photo_disc_loss": "1.7408894e-14"
}
*** Show trained model predictions sample ***
choose_30_images_methods loop (curr_method = 'farthest_images_by_structural_distance'):  43%|████▎     | 3/7 [40:43<54:22, 815.74s/it]
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 10:48:50.340213: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1


*** running_on_tpu - False ***


Epoch 1/40
30/30 - 19s - monet_gen_loss: 12.8621 - photo_gen_loss: 18.9080 - monet_disc_loss: 0.6567 - photo_disc_loss: 0.1373
Epoch 2/40
30/30 - 12s - monet_gen_loss: 12.7678 - photo_gen_loss: 16.6898 - monet_disc_loss: 0.5414 - photo_disc_loss: 0.1990
Epoch 3/40
30/30 - 12s - monet_gen_loss: 12.8024 - photo_gen_loss: 19.4358 - monet_disc_loss: 0.5067 - photo_disc_loss: 0.1215
Epoch 4/40
30/30 - 12s - monet_gen_loss: 12.7053 - photo_gen_loss: 18.2124 - monet_disc_loss: 0.6028 - photo_disc_loss: 0.1090
Epoch 5/40
30/30 - 12s - monet_gen_loss: 12.6745 - photo_gen_loss: 17.7417 - monet_disc_loss: 0.6123 - photo_disc_loss: 0.1319
Epoch 6/40
30/30 - 12s - monet_gen_loss: 12.7808 - photo_gen_loss: 20.5141 - monet_disc_loss: 0.6691 - photo_disc_loss: 0.1830
Epoch 7/40
30/30 - 12s - monet_gen_loss: 12.6841 - photo_gen_loss: 18.3286 - monet_disc_loss: 0.7969 - photo_disc_loss: 0.1106
Epoch 8/40
30/30 - 12s - monet_gen_loss: 12.6890 - photo_gen_loss: 17.4469 - monet_disc_loss: 0.7367 - photo_disc_loss: 0.1288
Epoch 9/40
30/30 - 12s - monet_gen_loss: 13.2923 - photo_gen_loss: 19.8859 - monet_disc_loss: 0.7046 - photo_disc_loss: 0.1278
Epoch 10/40
30/30 - 12s - monet_gen_loss: 12.5287 - photo_gen_loss: 20.0143 - monet_disc_loss: 0.6283 - photo_disc_loss: 0.1569
Epoch 11/40
30/30 - 12s - monet_gen_loss: 12.5142 - photo_gen_loss: 18.0855 - monet_disc_loss: 0.6602 - photo_disc_loss: 0.1055
Epoch 12/40
30/30 - 12s - monet_gen_loss: 12.6004 - photo_gen_loss: 18.4771 - monet_disc_loss: 0.7410 - photo_disc_loss: 0.0928
Epoch 13/40
30/30 - 12s - monet_gen_loss: 12.4794 - photo_gen_loss: 18.0350 - monet_disc_loss: 0.6820 - photo_disc_loss: 0.0910
Epoch 14/40
30/30 - 12s - monet_gen_loss: 12.4603 - photo_gen_loss: 18.5725 - monet_disc_loss: 0.6381 - photo_disc_loss: 0.1140
Epoch 15/40
30/30 - 12s - monet_gen_loss: 12.9816 - photo_gen_loss: 21.7840 - monet_disc_loss: 0.7680 - photo_disc_loss: 0.1944
Epoch 16/40
30/30 - 12s - monet_gen_loss: 12.4787 - photo_gen_loss: 18.5795 - monet_disc_loss: 0.6873 - photo_disc_loss: 0.1278
Epoch 17/40
30/30 - 12s - monet_gen_loss: 12.4684 - photo_gen_loss: 17.7501 - monet_disc_loss: 0.6842 - photo_disc_loss: 0.1082
Epoch 18/40
30/30 - 12s - monet_gen_loss: 12.7550 - photo_gen_loss: 17.9400 - monet_disc_loss: 0.6742 - photo_disc_loss: 0.1052
Epoch 19/40
30/30 - 12s - monet_gen_loss: 12.2377 - photo_gen_loss: 18.2094 - monet_disc_loss: 0.6829 - photo_disc_loss: 0.1018
Epoch 20/40
30/30 - 12s - monet_gen_loss: 12.2871 - photo_gen_loss: 18.6520 - monet_disc_loss: 0.6939 - photo_disc_loss: 0.1022
Epoch 21/40
30/30 - 12s - monet_gen_loss: 12.2630 - photo_gen_loss: 18.3685 - monet_disc_loss: 0.6821 - photo_disc_loss: 0.0988
Epoch 22/40
30/30 - 12s - monet_gen_loss: 12.7224 - photo_gen_loss: 18.0258 - monet_disc_loss: 0.6476 - photo_disc_loss: 0.0841
Epoch 23/40
30/30 - 12s - monet_gen_loss: 12.2766 - photo_gen_loss: 18.2194 - monet_disc_loss: 0.7053 - photo_disc_loss: 0.0723
Epoch 24/40
30/30 - 12s - monet_gen_loss: 12.7398 - photo_gen_loss: 18.6729 - monet_disc_loss: 0.8013 - photo_disc_loss: 0.0361
Epoch 25/40
30/30 - 12s - monet_gen_loss: 12.1432 - photo_gen_loss: 18.3452 - monet_disc_loss: 0.6537 - photo_disc_loss: 0.0659
Epoch 26/40
30/30 - 12s - monet_gen_loss: 12.2868 - photo_gen_loss: 18.5148 - monet_disc_loss: 0.6845 - photo_disc_loss: 0.0380
Epoch 27/40
30/30 - 12s - monet_gen_loss: 12.1234 - photo_gen_loss: 19.2056 - monet_disc_loss: 0.6390 - photo_disc_loss: 0.0156
Epoch 28/40
30/30 - 12s - monet_gen_loss: 12.1625 - photo_gen_loss: 19.6709 - monet_disc_loss: 0.6955 - photo_disc_loss: 0.0139
Epoch 29/40
30/30 - 12s - monet_gen_loss: 12.4627 - photo_gen_loss: 21.2310 - monet_disc_loss: 0.6796 - photo_disc_loss: 0.0676
Epoch 30/40
30/30 - 12s - monet_gen_loss: 12.1982 - photo_gen_loss: 19.0833 - monet_disc_loss: 0.6940 - photo_disc_loss: 0.0302
Epoch 31/40
30/30 - 12s - monet_gen_loss: 12.6077 - photo_gen_loss: 20.9613 - monet_disc_loss: 0.6649 - photo_disc_loss: 0.0750
Epoch 32/40
30/30 - 12s - monet_gen_loss: 12.5733 - photo_gen_loss: 17.9283 - monet_disc_loss: 0.6600 - photo_disc_loss: 0.0623
Epoch 33/40
30/30 - 12s - monet_gen_loss: 12.1878 - photo_gen_loss: 19.8256 - monet_disc_loss: 0.7349 - photo_disc_loss: 0.0393
Epoch 34/40
30/30 - 12s - monet_gen_loss: 12.1906 - photo_gen_loss: 18.1013 - monet_disc_loss: 0.6886 - photo_disc_loss: 0.0428
Epoch 35/40
30/30 - 12s - monet_gen_loss: 12.2117 - photo_gen_loss: 18.3803 - monet_disc_loss: 0.6487 - photo_disc_loss: 0.0382
Epoch 36/40
30/30 - 12s - monet_gen_loss: 12.3716 - photo_gen_loss: 18.3403 - monet_disc_loss: 0.6945 - photo_disc_loss: 0.0362
Epoch 37/40
30/30 - 12s - monet_gen_loss: 12.2900 - photo_gen_loss: 17.7114 - monet_disc_loss: 0.7086 - photo_disc_loss: 0.0432
Epoch 38/40
30/30 - 12s - monet_gen_loss: 12.3654 - photo_gen_loss: 21.3931 - monet_disc_loss: 0.6287 - photo_disc_loss: 0.1109
Epoch 39/40
30/30 - 12s - monet_gen_loss: 12.5356 - photo_gen_loss: 24.5154 - monet_disc_loss: 0.6595 - photo_disc_loss: 0.2047
Epoch 40/40
30/30 - 12s - monet_gen_loss: 12.4511 - photo_gen_loss: 20.9830 - monet_disc_loss: 0.6918 - photo_disc_loss: 0.0968
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "12.451134",
    "photo_gen_loss": "20.982998",
    "monet_disc_loss": "0.6918422",
    "photo_disc_loss": "0.096842155"
}
*** Show trained model predictions sample ***
choose_30_images_methods loop (curr_method = 'closest_images_by_structural_distance'):  57%|█████▋    | 4/7 [54:10<40:36, 812.04s/it] 
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 11:02:16.564175: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1


*** running_on_tpu - False ***


Epoch 1/40
30/30 - 19s - monet_gen_loss: 15.9486 - photo_gen_loss: 18.8947 - monet_disc_loss: 0.3569 - photo_disc_loss: 0.0929
Epoch 2/40
30/30 - 12s - monet_gen_loss: 14.4265 - photo_gen_loss: 16.6273 - monet_disc_loss: 0.6101 - photo_disc_loss: 0.3224
Epoch 3/40
30/30 - 12s - monet_gen_loss: 15.9128 - photo_gen_loss: 20.2557 - monet_disc_loss: 0.4543 - photo_disc_loss: 0.0503
Epoch 4/40
30/30 - 12s - monet_gen_loss: 18.6909 - photo_gen_loss: 19.6288 - monet_disc_loss: 0.5488 - photo_disc_loss: 0.2851
Epoch 5/40
30/30 - 12s - monet_gen_loss: 15.4178 - photo_gen_loss: 21.3885 - monet_disc_loss: 0.3068 - photo_disc_loss: 0.1255
Epoch 6/40
30/30 - 12s - monet_gen_loss: 15.1000 - photo_gen_loss: 21.2846 - monet_disc_loss: 0.5497 - photo_disc_loss: 0.0178
Epoch 7/40
30/30 - 12s - monet_gen_loss: 19.6554 - photo_gen_loss: 33.7426 - monet_disc_loss: 0.6839 - photo_disc_loss: 0.3210
Epoch 8/40
30/30 - 12s - monet_gen_loss: 18.1894 - photo_gen_loss: 22.9719 - monet_disc_loss: 0.7040 - photo_disc_loss: 0.0511
Epoch 9/40
30/30 - 12s - monet_gen_loss: 16.2397 - photo_gen_loss: 21.3332 - monet_disc_loss: 0.6635 - photo_disc_loss: 0.0583
Epoch 10/40
30/30 - 12s - monet_gen_loss: 15.4667 - photo_gen_loss: 25.0523 - monet_disc_loss: 0.5324 - photo_disc_loss: 0.0097
Epoch 11/40
30/30 - 12s - monet_gen_loss: 14.2019 - photo_gen_loss: 20.4687 - monet_disc_loss: 0.4974 - photo_disc_loss: 0.1210
Epoch 12/40
30/30 - 12s - monet_gen_loss: 15.1975 - photo_gen_loss: 25.7844 - monet_disc_loss: 0.6130 - photo_disc_loss: 0.1913
Epoch 13/40
30/30 - 12s - monet_gen_loss: 14.2626 - photo_gen_loss: 21.3968 - monet_disc_loss: 0.5344 - photo_disc_loss: 0.1215
Epoch 14/40
30/30 - 12s - monet_gen_loss: 14.6701 - photo_gen_loss: 19.6534 - monet_disc_loss: 0.7098 - photo_disc_loss: 0.0792
Epoch 15/40
30/30 - 12s - monet_gen_loss: 14.7684 - photo_gen_loss: 21.2659 - monet_disc_loss: 0.5396 - photo_disc_loss: 0.1183
Epoch 16/40
30/30 - 12s - monet_gen_loss: 14.1352 - photo_gen_loss: 21.1311 - monet_disc_loss: 0.6947 - photo_disc_loss: 0.1286
Epoch 17/40
30/30 - 12s - monet_gen_loss: 14.9113 - photo_gen_loss: 19.1099 - monet_disc_loss: 0.3190 - photo_disc_loss: 0.0963
Epoch 18/40
30/30 - 12s - monet_gen_loss: 15.4298 - photo_gen_loss: 20.2782 - monet_disc_loss: 1.5055 - photo_disc_loss: 0.0940
Epoch 19/40
30/30 - 12s - monet_gen_loss: 14.2855 - photo_gen_loss: 19.5207 - monet_disc_loss: 0.6743 - photo_disc_loss: 0.0783
Epoch 20/40
30/30 - 12s - monet_gen_loss: 14.9430 - photo_gen_loss: 21.1849 - monet_disc_loss: 0.4436 - photo_disc_loss: 0.0782
Epoch 21/40
30/30 - 12s - monet_gen_loss: 15.1501 - photo_gen_loss: 20.9840 - monet_disc_loss: 0.6593 - photo_disc_loss: 0.0662
Epoch 22/40
30/30 - 12s - monet_gen_loss: 15.7359 - photo_gen_loss: 20.6698 - monet_disc_loss: 0.6498 - photo_disc_loss: 0.0608
Epoch 23/40
30/30 - 12s - monet_gen_loss: 14.7862 - photo_gen_loss: 22.3758 - monet_disc_loss: 0.6325 - photo_disc_loss: 0.1248
Epoch 24/40
30/30 - 12s - monet_gen_loss: 14.5322 - photo_gen_loss: 21.2987 - monet_disc_loss: 0.6447 - photo_disc_loss: 0.1145
Epoch 25/40
30/30 - 12s - monet_gen_loss: 14.5001 - photo_gen_loss: 20.8213 - monet_disc_loss: 0.7024 - photo_disc_loss: 0.1040
Epoch 26/40
30/30 - 12s - monet_gen_loss: 15.8646 - photo_gen_loss: 20.1190 - monet_disc_loss: 0.6641 - photo_disc_loss: 0.0855
Epoch 27/40
30/30 - 12s - monet_gen_loss: 14.2169 - photo_gen_loss: 21.5684 - monet_disc_loss: 0.6975 - photo_disc_loss: 0.0964
Epoch 28/40
30/30 - 12s - monet_gen_loss: 14.7405 - photo_gen_loss: 19.2721 - monet_disc_loss: 0.6561 - photo_disc_loss: 0.0968
Epoch 29/40
30/30 - 12s - monet_gen_loss: 14.8280 - photo_gen_loss: 19.9939 - monet_disc_loss: 0.3899 - photo_disc_loss: 0.0573
Epoch 30/40
30/30 - 12s - monet_gen_loss: 14.3380 - photo_gen_loss: 20.4548 - monet_disc_loss: 0.6587 - photo_disc_loss: 0.0207
Epoch 31/40
30/30 - 12s - monet_gen_loss: 14.3457 - photo_gen_loss: 23.8585 - monet_disc_loss: 0.4335 - photo_disc_loss: 0.0482
Epoch 32/40
30/30 - 12s - monet_gen_loss: 14.1481 - photo_gen_loss: 30.0218 - monet_disc_loss: 0.4877 - photo_disc_loss: 0.3270
Epoch 33/40
30/30 - 12s - monet_gen_loss: 15.2862 - photo_gen_loss: 22.9729 - monet_disc_loss: 0.7064 - photo_disc_loss: 0.0189
Epoch 34/40
30/30 - 12s - monet_gen_loss: 14.0854 - photo_gen_loss: 21.1372 - monet_disc_loss: 0.6905 - photo_disc_loss: 0.0109
Epoch 35/40
30/30 - 12s - monet_gen_loss: 14.5422 - photo_gen_loss: 21.2787 - monet_disc_loss: 0.7148 - photo_disc_loss: 0.0092
Epoch 36/40
30/30 - 12s - monet_gen_loss: 14.2208 - photo_gen_loss: 21.3747 - monet_disc_loss: 0.6669 - photo_disc_loss: 0.0082
Epoch 37/40
30/30 - 12s - monet_gen_loss: 13.9638 - photo_gen_loss: 20.5721 - monet_disc_loss: 0.5902 - photo_disc_loss: 0.0087
Epoch 38/40
30/30 - 12s - monet_gen_loss: 14.0633 - photo_gen_loss: 22.1291 - monet_disc_loss: 0.6418 - photo_disc_loss: 0.0404
Epoch 39/40
30/30 - 12s - monet_gen_loss: 14.0494 - photo_gen_loss: 21.6886 - monet_disc_loss: 0.6580 - photo_disc_loss: 0.0057
Epoch 40/40
30/30 - 12s - monet_gen_loss: 14.2253 - photo_gen_loss: 23.4786 - monet_disc_loss: 0.7047 - photo_disc_loss: 0.0012
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "14.225349",
    "photo_gen_loss": "23.478567",
    "monet_disc_loss": "0.70471066",
    "photo_disc_loss": "0.0011804226"
}
*** Show trained model predictions sample ***
choose_30_images_methods loop (curr_method = 'farthest_images_by_earth_movers_distance'):  71%|███████▏  | 5/7 [1:07:45<27:06, 813.14s/it]
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 11:15:51.643182: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1


*** running_on_tpu - False ***


Epoch 1/40
30/30 - 20s - monet_gen_loss: 3.6819 - photo_gen_loss: 3.9068 - monet_disc_loss: 0.7391 - photo_disc_loss: 0.6583
Epoch 2/40
30/30 - 12s - monet_gen_loss: 3.5926 - photo_gen_loss: 3.8720 - monet_disc_loss: 0.6318 - photo_disc_loss: 0.6825
Epoch 3/40
30/30 - 12s - monet_gen_loss: 3.4384 - photo_gen_loss: 3.5290 - monet_disc_loss: 0.8795 - photo_disc_loss: 0.8475
Epoch 4/40
30/30 - 12s - monet_gen_loss: 2.7922 - photo_gen_loss: 2.8611 - monet_disc_loss: 0.6176 - photo_disc_loss: 0.6293
Epoch 5/40
30/30 - 12s - monet_gen_loss: 3.0111 - photo_gen_loss: 3.4307 - monet_disc_loss: 0.6998 - photo_disc_loss: 0.5995
Epoch 6/40
30/30 - 12s - monet_gen_loss: 2.8158 - photo_gen_loss: 3.0268 - monet_disc_loss: 0.6106 - photo_disc_loss: 0.6786
Epoch 7/40
30/30 - 12s - monet_gen_loss: 2.9992 - photo_gen_loss: 3.2774 - monet_disc_loss: 0.6715 - photo_disc_loss: 0.6985
Epoch 8/40
30/30 - 12s - monet_gen_loss: 2.7166 - photo_gen_loss: 2.9801 - monet_disc_loss: 0.7111 - photo_disc_loss: 0.6950
Epoch 9/40
30/30 - 12s - monet_gen_loss: 2.7203 - photo_gen_loss: 2.9351 - monet_disc_loss: 0.6702 - photo_disc_loss: 0.6917
Epoch 10/40
30/30 - 12s - monet_gen_loss: 2.7035 - photo_gen_loss: 3.0471 - monet_disc_loss: 0.6449 - photo_disc_loss: 0.6764
Epoch 11/40
30/30 - 12s - monet_gen_loss: 2.8504 - photo_gen_loss: 3.3838 - monet_disc_loss: 0.6693 - photo_disc_loss: 0.6741
Epoch 12/40
30/30 - 12s - monet_gen_loss: 2.5441 - photo_gen_loss: 2.7827 - monet_disc_loss: 0.6675 - photo_disc_loss: 0.6930
Epoch 13/40
30/30 - 12s - monet_gen_loss: 2.7564 - photo_gen_loss: 3.0031 - monet_disc_loss: 0.6230 - photo_disc_loss: 0.6945
Epoch 14/40
30/30 - 12s - monet_gen_loss: 3.0942 - photo_gen_loss: 3.3792 - monet_disc_loss: 0.6500 - photo_disc_loss: 0.6949
Epoch 15/40
30/30 - 12s - monet_gen_loss: 2.2907 - photo_gen_loss: 2.6865 - monet_disc_loss: 0.6895 - photo_disc_loss: 0.5974
Epoch 16/40
30/30 - 12s - monet_gen_loss: 2.5279 - photo_gen_loss: 2.8251 - monet_disc_loss: 0.6503 - photo_disc_loss: 0.6927
Epoch 17/40
30/30 - 12s - monet_gen_loss: 2.3032 - photo_gen_loss: 2.6565 - monet_disc_loss: 0.6751 - photo_disc_loss: 0.6752
Epoch 18/40
30/30 - 12s - monet_gen_loss: 2.6247 - photo_gen_loss: 2.9697 - monet_disc_loss: 0.6530 - photo_disc_loss: 0.6916
Epoch 19/40
30/30 - 12s - monet_gen_loss: 2.4320 - photo_gen_loss: 2.7333 - monet_disc_loss: 0.6024 - photo_disc_loss: 0.6762
Epoch 20/40
30/30 - 12s - monet_gen_loss: 2.3861 - photo_gen_loss: 2.3776 - monet_disc_loss: 0.5546 - photo_disc_loss: 0.6924
Epoch 21/40
30/30 - 12s - monet_gen_loss: 2.2335 - photo_gen_loss: 2.7330 - monet_disc_loss: 0.6266 - photo_disc_loss: 0.5402
Epoch 22/40
30/30 - 12s - monet_gen_loss: 2.2598 - photo_gen_loss: 2.5563 - monet_disc_loss: 0.6616 - photo_disc_loss: 0.6929
Epoch 23/40
30/30 - 12s - monet_gen_loss: 2.1933 - photo_gen_loss: 2.5707 - monet_disc_loss: 0.6389 - photo_disc_loss: 0.6285
Epoch 24/40
30/30 - 12s - monet_gen_loss: 2.2190 - photo_gen_loss: 2.5442 - monet_disc_loss: 0.6696 - photo_disc_loss: 0.6972
Epoch 25/40
30/30 - 12s - monet_gen_loss: 2.1848 - photo_gen_loss: 2.6856 - monet_disc_loss: 0.7242 - photo_disc_loss: 0.6449
Epoch 26/40
30/30 - 12s - monet_gen_loss: 2.1029 - photo_gen_loss: 2.3081 - monet_disc_loss: 0.6969 - photo_disc_loss: 0.6979
Epoch 27/40
30/30 - 12s - monet_gen_loss: 1.9887 - photo_gen_loss: 2.2075 - monet_disc_loss: 0.6517 - photo_disc_loss: 0.6783
Epoch 28/40
30/30 - 12s - monet_gen_loss: 2.0954 - photo_gen_loss: 2.4239 - monet_disc_loss: 0.6879 - photo_disc_loss: 0.6885
Epoch 29/40
30/30 - 12s - monet_gen_loss: 2.0656 - photo_gen_loss: 2.2929 - monet_disc_loss: 0.6748 - photo_disc_loss: 0.6574
Epoch 30/40
30/30 - 12s - monet_gen_loss: 2.1618 - photo_gen_loss: 2.3640 - monet_disc_loss: 0.6832 - photo_disc_loss: 0.6999
Epoch 31/40
30/30 - 12s - monet_gen_loss: 2.0235 - photo_gen_loss: 2.2630 - monet_disc_loss: 0.6471 - photo_disc_loss: 0.6467
Epoch 32/40
30/30 - 12s - monet_gen_loss: 2.1656 - photo_gen_loss: 2.5106 - monet_disc_loss: 0.6883 - photo_disc_loss: 0.6931
Epoch 33/40
30/30 - 12s - monet_gen_loss: 2.2673 - photo_gen_loss: 2.2728 - monet_disc_loss: 0.4713 - photo_disc_loss: 0.6106
Epoch 34/40
30/30 - 12s - monet_gen_loss: 2.1987 - photo_gen_loss: 2.5792 - monet_disc_loss: 0.6838 - photo_disc_loss: 0.6963
Epoch 35/40
30/30 - 12s - monet_gen_loss: 1.9373 - photo_gen_loss: 2.2331 - monet_disc_loss: 0.6824 - photo_disc_loss: 0.6737
Epoch 36/40
30/30 - 12s - monet_gen_loss: 1.9682 - photo_gen_loss: 2.1695 - monet_disc_loss: 0.6921 - photo_disc_loss: 0.6771
Epoch 37/40
30/30 - 12s - monet_gen_loss: 1.9670 - photo_gen_loss: 2.2151 - monet_disc_loss: 0.6956 - photo_disc_loss: 0.5377
Epoch 38/40
30/30 - 12s - monet_gen_loss: 1.9761 - photo_gen_loss: 2.1506 - monet_disc_loss: 0.6727 - photo_disc_loss: 0.6904
Epoch 39/40
30/30 - 12s - monet_gen_loss: 2.2104 - photo_gen_loss: 2.2272 - monet_disc_loss: 0.4939 - photo_disc_loss: 0.6106
Epoch 40/40
30/30 - 12s - monet_gen_loss: 1.9140 - photo_gen_loss: 2.2533 - monet_disc_loss: 0.6931 - photo_disc_loss: 0.6938
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "1.9139864",
    "photo_gen_loss": "2.2533166",
    "monet_disc_loss": "0.6930935",
    "photo_disc_loss": "0.6938362"
}
*** Show trained model predictions sample ***
choose_30_images_methods loop (curr_method = 'closest_images_by_earth_movers_distance'):  86%|████████▌ | 6/7 [1:21:03<13:28, 808.12s/it] 
set_training_random_seed() - seed value: 1
found 5 monet and 20 photo tfrec files.
set_training_random_seed() - seed value: 42
2023-02-15 11:29:10.272193: W tensorflow/core/data/root_dataset.cc:167] Optimization loop failed: Cancelled: Operation was cancelled
*** Selected 30 train monet photos (shape: (1, 320, 320, 3)) ***
set_training_random_seed() - seed value: 1


*** running_on_tpu - False ***


Epoch 1/40
30/30 - 19s - monet_gen_loss: 13.0015 - photo_gen_loss: 23.6025 - monet_disc_loss: 0.7319 - photo_disc_loss: 0.3188
Epoch 2/40
30/30 - 12s - monet_gen_loss: 12.1906 - photo_gen_loss: 16.9508 - monet_disc_loss: 0.5497 - photo_disc_loss: 0.1456
Epoch 3/40
30/30 - 12s - monet_gen_loss: 12.1366 - photo_gen_loss: 18.3463 - monet_disc_loss: 0.6968 - photo_disc_loss: 0.1489
Epoch 4/40
30/30 - 12s - monet_gen_loss: 12.1657 - photo_gen_loss: 18.0074 - monet_disc_loss: 0.7911 - photo_disc_loss: 0.1195
Epoch 5/40
30/30 - 12s - monet_gen_loss: 12.2070 - photo_gen_loss: 19.9578 - monet_disc_loss: 0.5071 - photo_disc_loss: 0.1708
Epoch 6/40
30/30 - 12s - monet_gen_loss: 11.9279 - photo_gen_loss: 17.1974 - monet_disc_loss: 0.5801 - photo_disc_loss: 0.0712
Epoch 7/40
30/30 - 12s - monet_gen_loss: 13.0880 - photo_gen_loss: 19.4724 - monet_disc_loss: 0.6613 - photo_disc_loss: 0.0502
Epoch 8/40
30/30 - 12s - monet_gen_loss: 12.0601 - photo_gen_loss: 29.0034 - monet_disc_loss: 0.5759 - photo_disc_loss: 0.6775
Epoch 9/40
30/30 - 12s - monet_gen_loss: 12.2745 - photo_gen_loss: 27.9616 - monet_disc_loss: 0.7287 - photo_disc_loss: 0.3157
Epoch 10/40
30/30 - 12s - monet_gen_loss: 12.2364 - photo_gen_loss: 20.0996 - monet_disc_loss: 0.6807 - photo_disc_loss: 0.1081
Epoch 11/40
30/30 - 12s - monet_gen_loss: 12.0916 - photo_gen_loss: 20.6588 - monet_disc_loss: 0.7027 - photo_disc_loss: 0.1144
Epoch 12/40
30/30 - 12s - monet_gen_loss: 11.8757 - photo_gen_loss: 14.2252 - monet_disc_loss: 0.6971 - photo_disc_loss: 33.4926
Epoch 13/40
30/30 - 12s - monet_gen_loss: 11.9626 - photo_gen_loss: 86.8205 - monet_disc_loss: 0.7000 - photo_disc_loss: 1.8250
Epoch 14/40
30/30 - 12s - monet_gen_loss: 11.8037 - photo_gen_loss: 14.1778 - monet_disc_loss: 0.6508 - photo_disc_loss: 73.3853
Epoch 15/40
30/30 - 12s - monet_gen_loss: 11.9149 - photo_gen_loss: 127.9150 - monet_disc_loss: 0.6376 - photo_disc_loss: 4.7042
Epoch 16/40
30/30 - 12s - monet_gen_loss: 11.9413 - photo_gen_loss: 1233.2212 - monet_disc_loss: 0.6499 - photo_disc_loss: 45.6540
Epoch 17/40
30/30 - 12s - monet_gen_loss: 11.9964 - photo_gen_loss: 76.0118 - monet_disc_loss: 0.6855 - photo_disc_loss: 1.4416
Epoch 18/40
30/30 - 12s - monet_gen_loss: 11.8635 - photo_gen_loss: 317.9837 - monet_disc_loss: 0.7102 - photo_disc_loss: 8.4590
Epoch 19/40
30/30 - 12s - monet_gen_loss: 12.0432 - photo_gen_loss: 145.6223 - monet_disc_loss: 0.6597 - photo_disc_loss: 2.7308
Epoch 20/40
30/30 - 12s - monet_gen_loss: 11.7487 - photo_gen_loss: 30.8400 - monet_disc_loss: 0.6382 - photo_disc_loss: 0.2359
Epoch 21/40
30/30 - 12s - monet_gen_loss: 11.9281 - photo_gen_loss: 26.5460 - monet_disc_loss: 1.1747 - photo_disc_loss: 0.3631
Epoch 22/40
30/30 - 12s - monet_gen_loss: 11.7702 - photo_gen_loss: 126.9469 - monet_disc_loss: 0.6146 - photo_disc_loss: 2.6145
Epoch 23/40
30/30 - 12s - monet_gen_loss: 11.7364 - photo_gen_loss: 60.1995 - monet_disc_loss: 0.6970 - photo_disc_loss: 0.8338
Epoch 24/40
30/30 - 12s - monet_gen_loss: 11.6041 - photo_gen_loss: 40.6890 - monet_disc_loss: 0.6682 - photo_disc_loss: 0.2977
Epoch 25/40
30/30 - 12s - monet_gen_loss: 11.9007 - photo_gen_loss: 33.6451 - monet_disc_loss: 0.8529 - photo_disc_loss: 0.1921
Epoch 26/40
30/30 - 12s - monet_gen_loss: 11.8171 - photo_gen_loss: 48.6337 - monet_disc_loss: 0.6938 - photo_disc_loss: 0.5204
Epoch 27/40
30/30 - 12s - monet_gen_loss: 11.7860 - photo_gen_loss: 20.1032 - monet_disc_loss: 0.6945 - photo_disc_loss: 0.1151
Epoch 28/40
30/30 - 12s - monet_gen_loss: 11.7059 - photo_gen_loss: 37.1607 - monet_disc_loss: 0.6903 - photo_disc_loss: 0.2913
Epoch 29/40
30/30 - 12s - monet_gen_loss: 11.7480 - photo_gen_loss: 17.6332 - monet_disc_loss: 0.6815 - photo_disc_loss: 0.1332
Epoch 30/40
30/30 - 12s - monet_gen_loss: 11.6897 - photo_gen_loss: 37.0722 - monet_disc_loss: 0.6489 - photo_disc_loss: 0.3363
Epoch 31/40
30/30 - 12s - monet_gen_loss: 11.7522 - photo_gen_loss: 18.3013 - monet_disc_loss: 0.6598 - photo_disc_loss: 0.1000
Epoch 32/40
30/30 - 12s - monet_gen_loss: 11.5854 - photo_gen_loss: 26.8533 - monet_disc_loss: 0.6305 - photo_disc_loss: 0.1396
Epoch 33/40
30/30 - 12s - monet_gen_loss: 11.7264 - photo_gen_loss: 20.1829 - monet_disc_loss: 0.6230 - photo_disc_loss: 0.0925
Epoch 34/40
30/30 - 12s - monet_gen_loss: 11.7881 - photo_gen_loss: 31.6619 - monet_disc_loss: 0.7273 - photo_disc_loss: 0.2341
Epoch 35/40
30/30 - 12s - monet_gen_loss: 11.8650 - photo_gen_loss: 18.1558 - monet_disc_loss: 0.6673 - photo_disc_loss: 0.0993
Epoch 36/40
30/30 - 12s - monet_gen_loss: 11.9370 - photo_gen_loss: 32.3746 - monet_disc_loss: 0.6672 - photo_disc_loss: 0.2542
Epoch 37/40
30/30 - 12s - monet_gen_loss: 12.6737 - photo_gen_loss: 18.2660 - monet_disc_loss: 0.4543 - photo_disc_loss: 0.0957
Epoch 38/40
30/30 - 12s - monet_gen_loss: 11.8672 - photo_gen_loss: 28.0183 - monet_disc_loss: 0.6665 - photo_disc_loss: 0.1922
Epoch 39/40
30/30 - 12s - monet_gen_loss: 11.7848 - photo_gen_loss: 19.3192 - monet_disc_loss: 0.6905 - photo_disc_loss: 0.0856
Epoch 40/40
30/30 - 12s - monet_gen_loss: 12.0494 - photo_gen_loss: 28.3169 - monet_disc_loss: 0.6432 - photo_disc_loss: 0.1670
*** trained cycle gan final losses ***
{
    "monet_gen_loss": "12.049357",
    "photo_gen_loss": "28.316858",
    "monet_disc_loss": "0.64316076",
    "photo_disc_loss": "0.16703498"
}
*** Show trained model predictions sample ***
choose_30_images_methods loop (curr_method = 'closest_images_by_earth_movers_distance'): 100%|██████████| 7/7 [1:34:38<00:00, 811.27s/it]